void toggle_guest_mode(struct exec_domain *ed)
{
ed->arch.flags ^= TF_kernel_mode;
- __asm__ __volatile__ ( "mfence; swapgs" ); /* AMD erratum #88 */
+ __asm__ __volatile__ ( "swapgs" );
update_pagetables(ed);
write_ptbase(ed);
}
/* If in kernel mode then switch the GS bases around. */
if ( n->arch.flags & TF_kernel_mode )
- __asm__ __volatile__ ( "mfence; swapgs" ); /* AMD erratum #88 */
+ __asm__ __volatile__ ( safe_swapgs );
if ( unlikely(!all_segs_okay) )
{
static void clear_segments(void)
{
__asm__ __volatile__ (
- "movl %0,%%ds; "
- "movl %0,%%es; "
- "movl %0,%%fs; "
- "movl %0,%%gs; "
- "mfence; swapgs; " /* AMD erratum #88 */
- "movl %0,%%gs"
+ " movl %0,%%ds; "
+ " movl %0,%%es; "
+ " movl %0,%%fs; "
+ " movl %0,%%gs; "
+ ""safe_swapgs" "
+ " movl %0,%%gs"
: : "r" (0) );
}
#include <xen/init.h>
#include <xen/mm.h>
#include <xen/sched.h>
+#include <asm/asm_defns.h>
#include <asm/page.h>
#include <asm/flushtlb.h>
#include <asm/fixmap.h>
long do_set_segment_base(unsigned int which, unsigned long base)
{
struct exec_domain *ed = current;
-
- base = canonicalise_virt_address(base);
+ long ret = 0;
switch ( which )
{
case SEGBASE_FS:
ed->arch.user_ctxt.fs_base = base;
- wrmsr(MSR_FS_BASE, base, base>>32);
+ if ( wrmsr_user(MSR_FS_BASE, base, base>>32) )
+ ret = -EFAULT;
break;
case SEGBASE_GS_USER:
ed->arch.user_ctxt.gs_base_user = base;
- wrmsr(MSR_SHADOW_GS_BASE, base, base>>32);
+ if ( wrmsr_user(MSR_SHADOW_GS_BASE, base, base>>32) )
+ ret = -EFAULT;
break;
case SEGBASE_GS_KERNEL:
ed->arch.user_ctxt.gs_base_kernel = base;
- wrmsr(MSR_GS_BASE, base, base>>32);
+ if ( wrmsr_user(MSR_GS_BASE, base, base>>32) )
+ ret = -EFAULT;
break;
case SEGBASE_GS_USER_SEL:
__asm__ __volatile__ (
" swapgs \n"
"1: movl %k0,%%gs \n"
- " mfence; swapgs \n" /* AMD erratum #88 */
+ " "safe_swapgs" \n"
".section .fixup,\"ax\" \n"
"2: xorl %k0,%k0 \n"
" jmp 1b \n"
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
- return 0;
+ return ret;
}
#define L3_DISALLOW_MASK ((cpu_has_nx?0:(1UL<<63)) | (7UL << 7))
#define L4_DISALLOW_MASK ((cpu_has_nx?0:(1UL<<63)) | (7UL << 7))
-#ifndef __ASSEMBLY__
-static inline unsigned long canonicalise_virt_address(unsigned long v)
-{
- v &= VADDR_MASK;
- if ( v & (1UL << (VADDR_BITS - 1)) )
- v |= ~VADDR_MASK;
-}
-#endif /* !__ASSEMBLY__ */
-
#endif /* __X86_64_PAGE_H__ */
/*